In [1]:
from PythonWrapper.tree_based_regression import *
%pylab inline
%load_ext cythonmagic
In [2]:
def data_augmentation(images, landmarks, bounding_boxes, R):
import random
normalized_landmarks = []
for face, shape in zip(bounding_boxes, landmarks):
normalized_landmarks.append((shape - face[:2]) / (face[2:] - np.asarray(face[:2])))
s0 = []
s_star = []
imgs = []
faces = []
for i in range(len(images)):
face = bounding_boxes[i]
s0 += [s * (face[2:] - np.asarray(face[:2])) + face[:2] for s in random.sample(normalized_landmarks, R)]
s_star += [landmarks[i]]*R
imgs += [images[i]]*R
faces += [bounding_boxes[i]]*R
return np.asarray(imgs), np.asarray(s0), np.asarray(s_star), faces
In [3]:
from landmarks_datasets import BioId
training_set_size = 900
dataset = BioId()
images = dataset.loadImages()[:training_set_size]
ground_truth = dataset.loadGroundTruth()[:training_set_size]
bounding_boxes = dataset.loadBoundingBoxes()[:training_set_size]
print "Number of test images: %d"%training_set_size
In [ ]:
T = 5
N = 300 / 20
D = 5
R = 20
In [ ]:
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)
In [ ]:
from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias
local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
In [ ]:
local_trees_training.train(s0, s_star, faces, imgs, (0,1), "local_trees_regression_model_bioid.txt")
In [7]:
lbf_training.train(s0, s_star, faces, imgs, (0,1), "lbf_regression_model_bioid.txt")
In [3]:
from landmarks_datasets import Dataset300W
dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadTrainingSet()
print "Size of training set: %d"%images.shape[0]
T = 5
N = 300 / 60
D = 5
R = 20
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)
In [4]:
from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias
local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
In [7]:
for criteria in node_separation_criterias.keys():
print "Training using " + criteria + " criteria..."
local_trees_training.train(s0, s_star, faces, imgs, (36,45), "models/alignment/criterias/local_trees_regression_model_300w_" + criteria.replace(" ","_") + ".txt", criteria)
In [5]:
local_trees_training.train(s0, s_star, faces, imgs, (36,45), "local_trees_regression_model_300w.txt")
In [5]:
for criteria in node_separation_criterias.keys():
print "Training using " + criteria + " criteria..."
lbf_training.train(s0, s_star, faces, imgs, (36,45), "models/alignment/criterias/lbf_regression_model_300w_" + criteria.replace(" ","_") + ".txt", criteria)
In [ ]:
lbf_training.train(s0, s_star, faces, imgs, (36,45), "lbf_regression_precise_model_300w.txt")
In [3]:
from landmarks_datasets import Dataset300W
dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadTrainingSet(detector = "opencv")
print "Size of training set: %d"%images.shape[0]
T = 5
N = 300 / 60
D = 5
R = 20
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)
from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias
local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
In [4]:
local_trees_training.train(s0, s_star, faces, imgs, (36,45), "local_trees_regression_model_300w_opencv_detector.txt")
In [5]:
lbf_training.train(s0, s_star, faces, imgs, (36,45), "lbf_regression_model_300w_opencv_detector.txt")
In [3]:
from landmarks_datasets import Dataset300W
dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadTrainingSet(contour_landmarks = False)
print "Training set size: %d"%images.shape[0]
T = 5
N = 300 / 50
D = 5
R = 20
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Training set size after data augmentation: %d"%len(s0)
from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias
local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
In [4]:
local_trees_training.train(s0, s_star, faces, imgs, (19,28), "local_trees_regression_model_300w_51_landmarks.txt")
In [5]:
lbf_training.train(s0, s_star, faces, imgs, (19,28), "lbf_regression_model_300w_51_landmarks.txt")
In [3]:
from landmarks_datasets import Dataset300W
dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadTrainingSet(detector = "opencv", contour_landmarks = False)
print "Training set size: %d"%images.shape[0]
T = 5
N = 300 / 50
D = 5
R = 20
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Training set size after data augmentation: %d"%len(s0)
from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias
local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
In [4]:
local_trees_training.train(s0, s_star, faces, imgs, (19,28), "local_trees_regression_model_300w_51_landmarks_opencv_detector.txt")
In [ ]:
lbf_training.train(s0, s_star, faces, imgs, (19,28), "lbf_regression_model_300w_51_landmarks_opencv_detector.txt")
In [3]:
from landmarks_datasets import Dataset300W
dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadCompleteDataset(detector = "opencv")
print "Size of training set: %d"%images.shape[0]
T = 5
N = 300 / 60
D = 5
R = 20
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)
from PythonWrapper.tree_based_regression import AlignmentMethodTraining
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
lbf_training.train(s0, s_star, faces, imgs, (36,45), "lbf_regression_model_68_landmarks.txt")
In [3]:
from landmarks_datasets import Dataset300W
dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadCompleteDataset(detector = "opencv", contour_landmarks = False)
print "Size of training set: %d"%images.shape[0]
T = 5
N = 300 / 50
D = 5
R = 20
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)
from PythonWrapper.tree_based_regression import AlignmentMethodTraining
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
lbf_training.train(s0, s_star, faces, imgs, (19,28), "lbf_regression_model_51_landmarks.txt")
In [4]:
from landmarks_datasets import Dataset300W
dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadCompleteDataset(detector = "perfect")
print "Size of training set: %d"%images.shape[0]
T = 5
N = 300 / 60
D = 5
R = 20
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)
from PythonWrapper.tree_based_regression import AlignmentMethodTraining
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
lbf_training.train(s0, s_star, faces, imgs, (36,45), "lbf_regression_model_68_landmarks_perfect_detector.txt")
del images, ground_truth, bounding_boxes
In [5]:
from landmarks_datasets import Dataset300W
dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadCompleteDataset(detector = "perfect", contour_landmarks = False)
print "Size of training set: %d"%images.shape[0]
T = 5
N = 300 / 50
D = 5
R = 20
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)
from PythonWrapper.tree_based_regression import AlignmentMethodTraining
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
lbf_training.train(s0, s_star, faces, imgs, (19,28), "lbf_regression_model_51_landmarks_perfect_detector.txt")
del images, ground_truth, bounding_boxes
In [ ]: